pt_migrate(v);
}
+void hvm_migrate_pirqs(struct vcpu *v)
+{
+ int pirq, irq;
+ struct irq_desc *desc;
+ struct domain *d = v->domain;
+ struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
+
+ if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
+ return;
+
+ spin_lock(&d->event_lock);
+ for ( pirq = find_first_bit(hvm_irq_dpci->mapping, d->nr_pirqs);
+ pirq < d->nr_pirqs;
+ pirq = find_next_bit(hvm_irq_dpci->mapping, d->nr_pirqs, pirq + 1) )
+ {
+ if ( !(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MACH_MSI) ||
+ (hvm_irq_dpci->mirq[pirq].gmsi.dest_vcpu_id != v->vcpu_id) )
+ continue;
+ desc = domain_spin_lock_irq_desc(v->domain, pirq, NULL);
+ if (!desc)
+ continue;
+ irq = desc - irq_desc;
+ ASSERT(MSI_IRQ(irq));
+ desc->handler->set_affinity(irq, *cpumask_of(v->processor));
+ spin_unlock_irq(&desc->lock);
+ }
+ spin_unlock(&d->event_lock);
+}
+
void hvm_do_resume(struct vcpu *v)
{
ioreq_t *p;
return 1;
}
+/* Return value, -1 : multi-dests, non-negative value: dest_vcpu_id */
+int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode)
+{
+ int dest_vcpu_id = -1, w = 0;
+ struct vcpu *v;
+
+ if ( d->max_vcpus == 1 )
+ return 0;
+
+ for_each_vcpu ( d, v )
+ {
+ if ( vlapic_match_dest(vcpu_vlapic(v), NULL, 0, dest, dest_mode) )
+ {
+ w++;
+ dest_vcpu_id = v->vcpu_id;
+ }
+ }
+ if ( w > 1 )
+ return -1;
+
+ return dest_vcpu_id;
+}
+
/* MSI-X mask bit hypervisor interception */
struct msixtbl_entry
{
vmx_clear_vmcs(v);
vmx_load_vmcs(v);
hvm_migrate_timers(v);
+ hvm_migrate_pirqs(v);
vmx_set_host_env(v);
vpid_sync_vcpu_all(v);
}
/* Only show CPU0 - CPU31's affinity info.*/
printk(" IRQ:%4d, IRQ affinity:0x%08x, Vec:%3d type=%-15s"
" status=%08x mapped, unbound\n",
- irq, *(int*)cfg->domain.bits, cfg->vector,
+ irq, *(int*)desc->affinity.bits, cfg->vector,
desc->handler->typename, desc->status);
else
{
printk(" IRQ:%4d, IRQ affinity:0x%08x, Vec:%3d type=%-15s "
"status=%08x in-flight=%d domain-list=",
- irq, *(int*)cfg->domain.bits, cfg->vector,
+ irq, *(int*)desc->affinity.bits, cfg->vector,
desc->handler->typename, desc->status, action->in_flight);
for ( i = 0; i < action->nr_guests; i++ )
bitmap_zero(hvm_irq_dpci->mapping, d->nr_pirqs);
memset(hvm_irq_dpci->hvm_timer, 0,
nr_irqs * sizeof(*hvm_irq_dpci->hvm_timer));
- for ( int i = 0; i < d->nr_pirqs; i++ )
+ for ( int i = 0; i < d->nr_pirqs; i++ ) {
INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list);
+ hvm_irq_dpci->mirq[i].gmsi.dest_vcpu_id = -1;
+ }
for ( int i = 0; i < NR_HVM_IRQS; i++ )
INIT_LIST_HEAD(&hvm_irq_dpci->girq[i]);
if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI )
{
+ uint8_t dest, dest_mode;
+ int dest_vcpu_id;
if ( !test_and_set_bit(pirq, hvm_irq_dpci->mapping))
{
hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
}
+ /* Caculate dest_vcpu_id for MSI-type pirq migration */
+ dest = hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DEST_ID_MASK;
+ dest_mode = !!(hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DM_MASK);
+ dest_vcpu_id = hvm_girq_dest_2_vcpu_id(d, dest, dest_mode);
+ hvm_irq_dpci->mirq[pirq].gmsi.dest_vcpu_id = dest_vcpu_id;
+ spin_unlock(&d->event_lock);
+ if ( dest_vcpu_id >= 0 )
+ hvm_migrate_pirqs(d->vcpu[dest_vcpu_id]);
}
else
{
gdprintk(XENLOG_INFO VTDPREFIX,
"VT-d irq bind: m_irq = %x device = %x intx = %x\n",
machine_gsi, device, intx);
+ spin_unlock(&d->event_lock);
}
- spin_unlock(&d->event_lock);
return 0;
}
void hvm_set_guest_time(struct vcpu *v, u64 guest_time);
u64 hvm_get_guest_time(struct vcpu *v);
+int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode);
+
#define hvm_paging_enabled(v) \
(!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG))
#define hvm_wp_enabled(v) \
unsigned int *ecx, unsigned int *edx);
void hvm_migrate_timers(struct vcpu *v);
void hvm_do_resume(struct vcpu *v);
+void hvm_migrate_pirqs(struct vcpu *v);
static inline void
hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2)
struct irq_desc;
extern void irq_complete_move(struct irq_desc **descp);
+extern struct irq_desc *irq_desc;
+
void lock_vector_lock(void);
void unlock_vector_lock(void);
struct hvm_gmsi_info {
uint32_t gvec;
uint32_t gflags;
+ int dest_vcpu_id; /* -1 :multi-dest, non-negative: dest_vcpu_id */
};
struct hvm_mirq_dpci_mapping {